Batch Normalization in gluon


In [1]:
import mxnet as mx

import numpy as np
from mxnet import gluon
from tqdm import tqdm_notebook as tqdm

Context


In [2]:
ctx = mx.cpu()

MNIST Dataset


In [3]:
batch_size = 64
num_inputs = 784
num_outputs = 10

In [4]:
def transform(data, label):
    return mx.nd.transpose(data.astype(np.float32), (2, 0, 1)) / 255, label.astype(np.float32)

In [5]:
train_data = gluon.data.DataLoader(dataset=gluon.data.vision.MNIST(train=True, transform=transform),
                                   batch_size=batch_size,
                                   shuffle=True)
test_data = gluon.data.DataLoader(dataset=gluon.data.vision.MNIST(train=False, transform=transform),
                                  batch_size=batch_size,
                                  shuffle=False)

CNN with Batch Normalization


In [6]:
num_fc = 512

In [7]:
net = gluon.nn.Sequential()
with net.name_scope():
    net.add(gluon.nn.Conv2D(channels=20,
                            kernel_size=5))
    net.add(gluon.nn.BatchNorm(axis=1,
                               center=True,
                               scale=True))
    net.add(gluon.nn.Activation(activation='relu'))
    net.add(gluon.nn.MaxPool2D(pool_size=2,
                               strides=2))

    net.add(gluon.nn.Conv2D(channels=50,
                            kernel_size=5))
    net.add(gluon.nn.BatchNorm(axis=1,
                               center=True,
                               scale=True))
    net.add(gluon.nn.Activation(activation='relu'))
    net.add(gluon.nn.MaxPool2D(pool_size=2,
                               strides=2))
    # The Flatten layer collapses all axis, except the first one, into one axis.
    net.add(gluon.nn.Flatten())
    net.add(gluon.nn.Dense(units=num_fc))
    net.add(gluon.nn.BatchNorm(axis=1,
                               center=True,
                               scale=True))
    net.add(gluon.nn.Activation(activation='relu'))
    net.add(gluon.nn.Dense(units=num_outputs))

Parameter initialization


In [8]:
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24),
                                ctx=ctx)

Softmax cross-entropy loss


In [9]:
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()

Optimizer


In [10]:
trainer = gluon.Trainer(params=net.collect_params(),
                        optimizer='sgd',
                        optimizer_params={'learning_rate': .1})

Accuracy evaluation


In [11]:
def evaluate_accuracy(data_iterator, net):
    acc = mx.metric.Accuracy()
    for i, (data, label) in enumerate(data_iterator):
        data = data.as_in_context(ctx)
        label = label.as_in_context(ctx)
        output = net(data)
        predictions = mx.nd.argmax(data=output,
                                   axis=1)
        acc.update(preds=predictions,
                   labels=label)
    return acc.get()[1]

Training


In [12]:
epochs = 1
smoothing_constant = .01

In [13]:
for e in range(epochs):
    for i, (data, label) in tqdm(enumerate(train_data)):
        data = data.as_in_context(ctx)
        label = label.as_in_context(ctx)
        with mx.autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)
        loss.backward()
        trainer.step(data.shape[0])

        ##########################
        #  Keep a moving average of the losses
        ##########################
        curr_loss = mx.nd.mean(loss).asscalar()
        moving_loss = (curr_loss if ((i == 0) and (e == 0))
                       else (1 - smoothing_constant) * moving_loss + (smoothing_constant) * curr_loss)

    test_accuracy = evaluate_accuracy(test_data, net)
    train_accuracy = evaluate_accuracy(train_data, net)
    print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" % (e, moving_loss, train_accuracy, test_accuracy))


Epoch 0. Loss: 0.05218624415229819, Train_acc 0.9908666666666667, Test_acc 0.9879